In [38]:
from numpy import *
from PIL import *
import pickle
from pylab import *
import os
In [39]:
import sift
import dsift
dsift = reload(dsift)
import imtools
imtools = reload(imtools)
In [40]:
def read_gesture_features_labels(path):
# make a list of the files with .dsift at the end
featlist = [os.path.join(path, f) for f in os.listdir(path)
if f.endswith('.dsift')]
# read features
features = []
for featfile in featlist:
l, d = sift.read_features_from_file(featfile)
features.append(d.flatten())
features = array(features)
# generate labels
labels = [featfile.split('/')[-1][0] for featfile in featlist]
return features, array(labels)
In [62]:
features, labels = read_gesture_features_labels('train/')
test_features, test_labels = read_gesture_features_labels('test/')
classnames = unique(labels)
In [63]:
# the first letter of the file name is the label
print labels
In [64]:
import pca
In [65]:
V, S, m = pca.pca(features)
In [66]:
V = V[:2] # take out the important features
features = array([dot(V, f-m) for f in features])
test_features = array([dot(V, f-m) for f in test_features])
In [67]:
import bayes
import poisson
poisson = reload(poisson)
In [68]:
bc = poisson.BayesClassifier()
blist = [features[where(labels==c)[0]] for c in classnames]
bc.train(blist, classnames)
In [69]:
res = bc.classify(test_features)[0]
print res
print test_labels
In [18]:
acc = sum(1.0*(res==test_labels[0]))/len(test_labels[0])
print 'Accuracy:', acc
this is better than knn, what's going on?
In [17]:
def print_confusion(res, test_labels, classnames):
n = len(classnames)
class_ind = dict([(classnames[i], i) for i in range(n)])
confuse = zeros((n, n))
for i in range(len(test_labels)):
confuse[class_ind[res[i]], class_ind[test_labels[i]]] += 1
print 'Confusion matrix for'
print classnames
print confuse
In [15]:
print_confusion(res, test_labels, classnames)
In [ ]: